update_guest_eip();
break;
+ case EXIT_REASON_VMWRITE:
+ if ( nvmx_handle_vmwrite(regs) == X86EMUL_OKAY )
+ update_guest_eip();
+ break;
+
case EXIT_REASON_MWAIT_INSTRUCTION:
case EXIT_REASON_MONITOR_INSTRUCTION:
case EXIT_REASON_VMLAUNCH:
case EXIT_REASON_VMREAD:
case EXIT_REASON_VMRESUME:
- case EXIT_REASON_VMWRITE:
case EXIT_REASON_GETSEC:
case EXIT_REASON_INVEPT:
case EXIT_REASON_INVVPID:
return X86EMUL_OKAY;
}
+int nvmx_handle_vmwrite(struct cpu_user_regs *regs)
+{
+ struct vcpu *v = current;
+ struct vmx_inst_decoded decode;
+ struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+ unsigned long operand;
+ u64 vmcs_encoding;
+
+ if ( decode_vmx_inst(regs, &decode, &operand, 0)
+ != X86EMUL_OKAY )
+ return X86EMUL_EXCEPTION;
+
+ vmcs_encoding = reg_read(regs, decode.reg2);
+ __set_vvmcs(nvcpu->nv_vvmcx, vmcs_encoding, operand);
+
+ if ( vmcs_encoding == IO_BITMAP_A || vmcs_encoding == IO_BITMAP_A_HIGH )
+ __map_io_bitmap (v, IO_BITMAP_A);
+ else if ( vmcs_encoding == IO_BITMAP_B ||
+ vmcs_encoding == IO_BITMAP_B_HIGH )
+ __map_io_bitmap (v, IO_BITMAP_B);
+
+ vmreturn(regs, VMSUCCEED);
+ return X86EMUL_OKAY;
+}
+
int nvmx_handle_vmptrld(struct cpu_user_regs *regs);
int nvmx_handle_vmptrst(struct cpu_user_regs *regs);
int nvmx_handle_vmclear(struct cpu_user_regs *regs);
+int nvmx_handle_vmwrite(struct cpu_user_regs *regs);
#endif /* __ASM_X86_HVM_VVMX_H__ */